Segal
https://drive.google.com/file/d/14qTfcZfom6zvoJTMz_KQqL9ZPepqVoF3/view
Files: No files.
Tags: No tags.A CKPT file is a checkpoint file created by PyTorch Lightning, a PyTorch research framework. It contains a dump of a PyTorch Lightning machine learning model. Developers create CKPT files to preserve the previous states of a machine learning model, while training it to its final state. this indicates it will have both biases and weights Could not open step-000029999.ckpt: DATA_LOSS: not an sstable (bad magic number): perhaps your file is in a different file format and you need to use a different restore operator?
Traceback (most recent call last):
File "/home/deuterium/.local/lib/python3.10/site-packages/tensorflow/python/training/py_checkpoint_reader.py", line 92, in NewCheckpointReader
return CheckpointReader(compat.as_bytes(filepattern))
RuntimeError: Unable to open table file step-000029999.ckpt: DATA_LOSS: not an sstable (bad magic number): perhaps your file is in a different file format and you need to use a different restore operator?model = tf.keras.models.load_model("step-000029999.ckpt")
fid = h5f.open(name, flags, fapl=fapl)
File "h5py/_objects.pyx", line 54, in h5py._objects.with_phil.wrapper
File "h5py/_objects.pyx", line 55, in h5py._objects.with_phil.wrapper
File "h5py/h5f.pyx", line 106, in h5py.h5f.open
OSError: Unable to open file (file signature not found)import torch
import torch.nn as nn
checkpoint = torch.load("./step-000029999.ckpt")
# printing stuff for chadgpt
step = checkpoint['step']
pipeline = checkpoint['pipeline']
optimizers = checkpoint['optimizers']
scalers = checkpoint['scalers']
# printing for chadgpt to figure out architecture
print("scalers:",scalers)
print("step:", step)
for i in pipeline:
if pipeline[i].ndim == 0:
print(i,pipeline[i])
else:
print(i,pipeline[i].size())
for opt in optimizers:
print("optimizer:", opt)
print(optimizers[opt]["param_groups"])
for opt_state_num, opt_state in optimizers[opt]["state"].items():
print(opt_state_num,"step",opt_state["step"])
print(opt_state_num,"exp_avg", opt_state["exp_avg"].size())
print(opt_state_num,"exp_avg_sq", opt_state["exp_avg_sq"].size())class Field(nn.Module):
def __init__(self):
super(Field, self).__init__()
self.aabb = torch.Tensor(2, 3)
self.embedding_appearance = nn.Linear(224, 32)
# Direction encoding and position encoding are not specified in the output
self.mlp_base = ... # Define MLP base with 12,199,312 parameters
self.mlp_head = ... # Define MLP head with 9,216 parameters
class ProposalNetwork(nn.Module):
def __init__(self):
super(ProposalNetwork, self).__init__()
self.aabb = torch.Tensor(2, 3)
self.mlp_base = ... # Define MLP base with 767,040 parameters
class CameraOptimizer(nn.Module):
def __init__(self):
super(CameraOptimizer, self).__init__()
self.pose_adjustment = nn.Parameter(torch.Tensor(224, 6))
class RayGenerator(nn.Module):
def __init__(self):
super(RayGenerator, self).__init__()
self.pose_adjustment = nn.Parameter(torch.Tensor(224, 6))
class Model(nn.Module):
def __init__(self):
super(Model, self).__init__()
self.field = Field()
self.proposal_networks = nn.ModuleList([ProposalNetwork(), ProposalNetwork()])
self.train_camera_optimizer = CameraOptimizer()
self.train_ray_generator = RayGenerator()
self.eval_camera_optimizer = CameraOptimizer()
self.eval_ray_generator = RayGenerator()
def forward(self, x):
pass # Define the forward pass based on the model componentsimport torch
import torch.nn as nn
checkpoint = torch.load("./step-000029999.ckpt")
# printing stuff for chadgpt
step = checkpoint['step']
pipeline = checkpoint['pipeline']
optimizers = checkpoint['optimizers']
scalers = checkpoint['scalers']
# printing for chadgpt to figure out architecture
print("scalers:",scalers)
print("step:", step)
for i in pipeline:
if pipeline[i].ndim == 0:
print(i,pipeline[i])
else:
print(i,pipeline[i].size())
for opt in optimizers:
print("optimizer:", opt)
print(optimizers[opt]["param_groups"])
for opt_state_num, opt_state in optimizers[opt]["state"].items():
print(opt_state_num,"step",opt_state["step"])
print(opt_state_num,"exp_avg", opt_state["exp_avg"].size())
print(opt_state_num,"exp_avg_sq", opt_state["exp_avg_sq"].size()) class Field(nn.Module):
def __init__(self):
super(Field, self).__init__()
self.aabb = torch.Tensor(2, 3)
self.embedding_appearance = nn.Linear(224, 32)
# Direction encoding and position encoding are not specified in the output
self.mlp_base = ... # Define MLP base with 12,199,312 parameters
self.mlp_head = ... # Define MLP head with 9,216 parameters
class ProposalNetwork(nn.Module):
def __init__(self):
super(ProposalNetwork, self).__init__()
self.aabb = torch.Tensor(2, 3)
self.mlp_base = ... # Define MLP base with 767,040 parameters
class CameraOptimizer(nn.Module):
def __init__(self):
super(CameraOptimizer, self).__init__()
self.pose_adjustment = nn.Parameter(torch.Tensor(224, 6))
class RayGenerator(nn.Module):
def __init__(self):
super(RayGenerator, self).__init__()
self.pose_adjustment = nn.Parameter(torch.Tensor(224, 6))
class Model(nn.Module):
def __init__(self):
super(Model, self).__init__()
self.field = Field()
self.proposal_networks = nn.ModuleList([ProposalNetwork(), ProposalNetwork()])
self.train_camera_optimizer = CameraOptimizer()
self.train_ray_generator = RayGenerator()
self.eval_camera_optimizer = CameraOptimizer()
self.eval_ray_generator = RayGenerator()
def forward(self, x):
pass # Define the forward pass based on the model components class Field(nn.Module):
def __init__(self):
super(Field, self).__init__()
self.aabb = torch.Tensor(2, 3)
self.embedding_appearance = nn.Linear(224, 32)
# Direction encoding and position encoding are not specified in the output
self.mlp_base = ... # Define MLP base with 12,199,312 parameters
self.mlp_head = ... # Define MLP head with 9,216 parameters
class ProposalNetwork(nn.Module):
def __init__(self):
super(ProposalNetwork, self).__init__()
self.aabb = torch.Tensor(2, 3)
self.mlp_base = ... # Define MLP base with 767,040 parameters
class CameraOptimizer(nn.Module):
def __init__(self):
super(CameraOptimizer, self).__init__()
self.pose_adjustment = nn.Parameter(torch.Tensor(224, 6))
class RayGenerator(nn.Module):
def __init__(self):
super(RayGenerator, self).__init__()
self.pose_adjustment = nn.Parameter(torch.Tensor(224, 6))
class Model(nn.Module):
def __init__(self):
super(Model, self).__init__()
self.field = Field()
self.proposal_networks = nn.ModuleList([ProposalNetwork(), ProposalNetwork()])
self.train_camera_optimizer = CameraOptimizer()
self.train_ray_generator = RayGenerator()
self.eval_camera_optimizer = CameraOptimizer()
self.eval_ray_generator = RayGenerator()
def forward(self, x):
pass # Define the forward pass based on the model components checkpoint = torch.load("./step-000029999.ckpt", map_location=torch.device('cpu'))
You need this for CPU device (like my wsl)checkpoint = torch.load("./step-000029999.ckpt", map_location=torch.device('cpu'))
You need this for CPU device (like my wsl)